bcf60ce8dd8617da1190a36fab7ef741a124b1b9
[openwrt/openwrt.git] /
1 From d6d2b0e1538d5c381ec0ca95afaf772c096ea5dc Mon Sep 17 00:00:00 2001
2 From: Lorenzo Bianconi <lorenzo@kernel.org>
3 Date: Thu, 15 May 2025 08:33:06 +0200
4 Subject: [PATCH] net: airoha: Fix page recycling in airoha_qdma_rx_process()
5
6 Do not recycle the page twice in airoha_qdma_rx_process routine in case
7 of error. Just run dev_kfree_skb() if the skb has been allocated and marked
8 for recycling. Run page_pool_put_full_page() directly if the skb has not
9 been allocated yet.
10 Moreover, rely on DMA address from queue entry element instead of reading
11 it from the DMA descriptor for DMA syncing in airoha_qdma_rx_process().
12
13 Fixes: e12182ddb6e71 ("net: airoha: Enable Rx Scatter-Gather")
14 Signed-off-by: Lorenzo Bianconi <lorenzo@kernel.org>
15 Link: https://patch.msgid.link/20250515-airoha-fix-rx-process-error-condition-v2-1-657e92c894b9@kernel.org
16 Signed-off-by: Jakub Kicinski <kuba@kernel.org>
17 ---
18 drivers/net/ethernet/airoha/airoha_eth.c | 22 +++++++++-------------
19 1 file changed, 9 insertions(+), 13 deletions(-)
20
21 --- a/drivers/net/ethernet/airoha/airoha_eth.c
22 +++ b/drivers/net/ethernet/airoha/airoha_eth.c
23 @@ -636,7 +636,6 @@ static int airoha_qdma_rx_process(struct
24 struct airoha_queue_entry *e = &q->entry[q->tail];
25 struct airoha_qdma_desc *desc = &q->desc[q->tail];
26 u32 hash, reason, msg1 = le32_to_cpu(desc->msg1);
27 - dma_addr_t dma_addr = le32_to_cpu(desc->addr);
28 struct page *page = virt_to_head_page(e->buf);
29 u32 desc_ctrl = le32_to_cpu(desc->ctrl);
30 struct airoha_gdm_port *port;
31 @@ -645,22 +644,16 @@ static int airoha_qdma_rx_process(struct
32 if (!(desc_ctrl & QDMA_DESC_DONE_MASK))
33 break;
34
35 - if (!dma_addr)
36 - break;
37 -
38 - len = FIELD_GET(QDMA_DESC_LEN_MASK, desc_ctrl);
39 - if (!len)
40 - break;
41 -
42 q->tail = (q->tail + 1) % q->ndesc;
43 q->queued--;
44
45 - dma_sync_single_for_cpu(eth->dev, dma_addr,
46 + dma_sync_single_for_cpu(eth->dev, e->dma_addr,
47 SKB_WITH_OVERHEAD(q->buf_size), dir);
48
49 + len = FIELD_GET(QDMA_DESC_LEN_MASK, desc_ctrl);
50 data_len = q->skb ? q->buf_size
51 : SKB_WITH_OVERHEAD(q->buf_size);
52 - if (data_len < len)
53 + if (!len || data_len < len)
54 goto free_frag;
55
56 p = airoha_qdma_get_gdm_port(eth, desc);
57 @@ -723,9 +716,12 @@ static int airoha_qdma_rx_process(struct
58 q->skb = NULL;
59 continue;
60 free_frag:
61 - page_pool_put_full_page(q->page_pool, page, true);
62 - dev_kfree_skb(q->skb);
63 - q->skb = NULL;
64 + if (q->skb) {
65 + dev_kfree_skb(q->skb);
66 + q->skb = NULL;
67 + } else {
68 + page_pool_put_full_page(q->page_pool, page, true);
69 + }
70 }
71 airoha_qdma_fill_rx_queue(q);
72